import warnings
warnings.filterwarnings('ignore')
from skimage.feature import graycomatrix, graycoprops
from skimage.color import rgb2gray
from skimage import img_as_ubyte, feature
import os
import numpy as np
from skimage.io import imread
from skimage.transform import resize
from sklearn.model_selection import train_test_split
class Preprocessor:
def __init__(self):
# self.image_size = image_size
pass
def extract_features(self,image):
"""
@brief This method extracts Haralick texture features and Histogram of Oriented Gradients (HOG) features from an image.
@details The input image is first converted to grayscale, as the feature extraction methods require 2D grayscale images.
The grayscale image is then rescaled from a float range (0.0 - 1.0 by default) to uint8 range (0-255).
The Gray-Level Co-Occurrence Matrix (GLCM) of the grayscale image is computed, and Haralick texture features are extracted from the GLCM.
HOG features are then computed from the grayscale image. Finally, the Haralick and HOG features are combined into a single feature vector.
@param image The input image from which features are to be extracted.
@return A numpy array containing the combined Haralick and HOG feature vector.
"""
# Convert the RGB image to grayscale as the feature extraction methods require 2D grayscale images
image_gray = rgb2gray(image)
# Rescale the grayscale image from a float range (0.0 - 1.0 by default) to uint8 range (0-255)
image_gray = img_as_ubyte(image_gray)
# Compute the Gray-Level Co-Occurrence Matrix (GLCM) of the grayscale image
glcm = graycomatrix(image_gray, distances=[5], angles=[0], levels=256, symmetric=True, normed=True)
# Compute Haralick texture features from the GLCM
contrast = graycoprops(glcm, 'contrast')[0, 0]
dissimilarity = graycoprops(glcm, 'dissimilarity')[0, 0]
homogeneity = graycoprops(glcm, 'homogeneity')[0, 0]
energy = graycoprops(glcm, 'energy')[0, 0]
correlation = graycoprops(glcm, 'correlation')[0, 0]
# Combine all Haralick features into a single numpy array
haralick_features = np.array([contrast, dissimilarity, homogeneity, energy, correlation])
# Compute the Histogram of Oriented Gradients (HOG) features of the grayscale image
hog_features = feature.hog(image_gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm='L2-Hys')
# Combine the Haralick and HOG features into a single feature vector
features = np.concatenate((haralick_features, hog_features))
# Return the combined feature vector
return features
def load_images_from_folder(self,folder_name, categories, image_size=(150, 150, 3)):
"""
Load images from a specified folder, resize them to the same size, and flatten them.
Args:
folder_name (str): The folder in which the images are stored.
categories (list of str): The categories of images.
image_size (tuple): The size to which to resize the images.
Returns:
flat_data_arr (list): The list of flattened, resized images.
target_arr (list): The list of target categories for each image.
"""
flat_data_arr = []
target_arr = []
for i, category in enumerate(categories):
print(f'Loading... category: {category}')
category_path = os.path.join(folder_name, category)
if not os.path.isdir(category_path):
print(f'Category path {category_path} not found')
continue
for img_file in os.listdir(category_path):
img_file_path = os.path.join(category_path, img_file)
try:
img_array = imread(img_file_path)
img_resized = resize(img_array, image_size)
features = self.extract_features(img_resized)
flat_data_arr.append(features)
target_arr.append(i)
except Exception as e:
if (".DS_Store" in img_file_path):
continue
print(f'Error occurred while processing file {img_file_path}. Error message: {e}')
continue
print(f'Loaded category: {category} successfully')
return flat_data_arr, target_arr , img_resized
def preprocess(self, dataset='dataset_18',categories=['covid','normal']):
# Usage:
flat_data, target, img_resized = self.load_images_from_folder(dataset, categories)
flat_data = np.array(flat_data)
target = np.array(target)
return flat_data,target
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import xgboost as xgb
from bayes_opt import BayesianOptimization
import joblib
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
class Classifier:
"""
@brief Class representing a binary classifier with a XGBoost model.
@param validation (tuple): A tuple of validation datasets to be used for early stopping in XGBoost training.
"""
def __init__(self, validation):
"""
@brief Constructor method. Initializes main attributes.
"""
self.model = None
self.pipeline = None
self.best_params = None
self.validation = validation
def custom_cross_val(self, pipeline, X, y, n_splits=5, random_state=1):
"""
@brief Performs cross validation given a pipeline, features and targets.
@param pipeline: scikit-learn Pipeline object
@param X: numpy array or pandas DataFrame with features
@param y: numpy array or pandas Series with targets
@param n_splits: number of cross-validation folds
@param random_state: random seed for reproducibility
@return The mean of accuracy scores across all folds.
"""
# Define the cross-validator (here, we use n_splits-fold CV)
kf = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)
# This will hold the cross-validation scores
cv_scores = []
# Perform the cross-validation manually
for train_index, val_index in kf.split(X):
cv_X_train, cv_X_val = X[train_index], X[val_index]
cv_y_train, cv_y_val = y[train_index], y[val_index]
if(self.model_type == "xgb"):
pipeline.fit(cv_X_train, cv_y_train,
classifier__eval_set=[(cv_X_val, cv_y_val)],
classifier__early_stopping_rounds=5, classifier__verbose=False)
else:
pipeline.fit(cv_X_train, cv_y_train)
y_pred = pipeline.predict(cv_X_val)
cv_scores.append(accuracy_score(cv_y_val, y_pred))
return np.mean(cv_scores)
def svm_func(self, C, gamma, X_train, y_train):
"""
Trains an SVM model with given hyperparameters and returns cross-validation score.
Params:
C: Penalty parameter C of the error term.
gamma: Kernel coefficient.
X_train: training features
y_train: training target
Returns: Cross-validation score.
"""
model = SVC(C=C, gamma=gamma, probability=True)
pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', model)])
score = self.custom_cross_val(pipeline, X_train, y_train)
return score
def rf_func(self, n_estimators, max_depth, max_features, X_train, y_train):
"""
Trains a Random Forest model with given hyperparameters and returns cross-validation score.
Params:
n_estimators: The number of trees in the forest.
max_depth: The maximum depth of the tree.
max_features : The number of features to consider when looking for the best split
X_train: training features
y_train: training target
Returns: Cross-validation score.
"""
model = RandomForestClassifier(n_estimators=int(n_estimators),
max_depth=int(max_depth),
max_features=max_features,
random_state=1)
pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', model)])
score = self.custom_cross_val(pipeline, X_train, y_train)
return score
def xgb_func(self, n_estimators, learning_rate, max_depth, subsample, colsample_bytree, gamma, X_train, y_train):
"""
@brief Trains a XGBoost model with given hyperparameters and returns cross-validation score.
@param n_estimators: number of boosted trees to fit
@param learning_rate: boosting learning rate
@param max_depth: maximum depth of a tree
@param subsample: subsample ratio of the training instances
@param colsample: subsample ratio of columns when constructing each tree
@param gamma: minimum loss reduction required to make a split
@param X_train: training features
@param y_train: training target
@return Cross-validation score.
"""
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = xgb.XGBClassifier(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
subsample=subsample,
colsample_bytree=colsample_bytree,
gamma=gamma,
eval_metric='logloss',
objective='binary:logistic',
tree_method='gpu_hist', gpu_id=0)
pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', model)])
score = self.custom_cross_val(pipeline, X_train, y_train)
return score
def optimize(self, X_train, y_train, model_type='xgb'):
"""
Uses Bayesian optimization to find the best hyperparameters for the model.
Params:
X_train: training features
y_train: training target
model_type: The type of model to use. Can be 'xgb', 'svm', or 'rf'.
"""
self.model_type = model_type
if model_type == 'xgb':
pbounds = {'learning_rate': (0.01, 0.2),
'n_estimators': (100, 1000),
'max_depth': (3, 7),
'subsample': (0.3, 0.7),
'colsample_bytree': (0.1, 0.5),
'gamma': (0, 5)}
optimizer = BayesianOptimization(
f=lambda n_estimators, learning_rate, max_depth, subsample, colsample_bytree, gamma: self.xgb_func(
n_estimators, learning_rate, max_depth, subsample, colsample_bytree, gamma, X_train, y_train),
pbounds=pbounds,
random_state=1,
)
elif model_type == 'svm':
pbounds = {'C': (0.1, 100),
'gamma': (0.0001, 1)}
optimizer = BayesianOptimization(
f=lambda C, gamma: self.svm_func(
C, gamma, X_train, y_train),
pbounds=pbounds,
random_state=1,
)
elif model_type == 'rf':
pbounds = {'n_estimators': (100, 1000),
'max_depth': (3, 7),
'max_features': (0.1, 0.5)}
optimizer = BayesianOptimization(
f=lambda n_estimators, max_depth, max_features: self.rf_func(
n_estimators, max_depth, max_features, X_train, y_train),
pbounds=pbounds,
random_state=1,
)
optimizer.maximize(init_points=3, n_iter=2)
self.best_params = optimizer.max['params']
def train(self, X_train, y_train, model_type='xgb'):
"""
Train the model with the best hyperparameters.
Params:
X_train: training features
y_train: training target
model_type: The type of model to use. Can be 'xgb', 'svm', or 'rf'.
Returns: Evaluated results of the model
"""
if model_type == 'xgb':
self.best_params['n_estimators'] = int(self.best_params['n_estimators'])
self.best_params['max_depth'] = int(self.best_params['max_depth'])
self.pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', xgb.XGBClassifier(**self.best_params,
eval_metric='logloss',
objective='binary:logistic',
tree_method='gpu_hist', gpu_id=0))])
self.pipeline.fit(X_train, y_train,
classifier__eval_set=self.validation)
return self.pipeline.named_steps['classifier'].evals_result()
elif model_type == 'svm':
self.pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', SVC(**self.best_params,probability=True))])
elif model_type == 'rf':
self.best_params['n_estimators'] = int(self.best_params['n_estimators'])
self.best_params['max_depth'] = int(self.best_params['max_depth'])
self.pipeline = Pipeline([('scaler', StandardScaler()),
('classifier', RandomForestClassifier(**self.best_params))])
self.pipeline.fit(X_train, y_train)
return None
def predict(self, X):
"""
@brief Predicts target with the trained model.
@param X: numpy array or pandas DataFrame with features
@return Predicted target values.
"""
return self.pipeline.predict(X)
def save_model(self, file_path):
"""
@brief Saves the trained model to a file.
@param file_path: path to the output file
"""
if self.pipeline is not None:
joblib.dump(self.pipeline, file_path)
else:
print("Cannot save an uninitialized model. Please train the model first.")
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score,classification_report
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage.io import imread
import os
class Evaluator:
"""
@brief Class for evaluating a classifier model and making predictions.
@param model: The classifier model to evaluate
@param preprocessor: The preprocessor used for the model
"""
def __init__(self, model, preprocessor):
"""
@brief Constructor method. Initializes main attributes.
@param model: Trained model for evaluation.
@param preprocessor: Preprocessor for data preparation.
"""
self.model = model
self.preprocessor = preprocessor
def evaluate(self, X_test, y_test):
"""
@brief Evaluates the model using a test set and calculates evaluation metrics.
@param X_test: Test features.
@param y_test: True target values.
@return A report containing the evaluation metrics.
"""
y_pred = self.model.predict(X_test)
report = self.calculate_metrics(y_test, y_pred)
return report
def plot_log_loss(self, evals_result):
"""
@brief Plots the log loss curve of the model during its training.
@param evals_result: Dictionary containing the evaluation results.
"""
epochs = len(evals_result['validation_0']['logloss'])
x_axis = range(0, epochs)
fig, ax = plt.subplots()
ax.plot(x_axis, evals_result['validation_0']['logloss'], label='Train')
ax.plot(x_axis, evals_result['validation_1']['logloss'], label='Validation')
ax.legend()
plt.ylabel('Log Loss')
plt.title('XGBoost Log Loss')
plt.show()
def predict_images_in_dir(self, directory, Categories):
"""
@brief Makes predictions for all images in a directory.
@param directory: The directory path containing the images.
@param Categories: The categories that the images can belong to.
"""
for image_name in os.listdir(directory):
if image_name.endswith(".jpg") or image_name.endswith(".png"):
print(f"Image Name: {image_name}")
image_path = os.path.join(directory, image_name)
img = imread(image_path)
plt.imshow(img)
plt.show()
img_resized = resize(img, (150, 150, 3))
features = self.preprocessor.extract_features(img_resized)
l=[features]
probability = self.model.predict_proba(l)
for ind, val in enumerate(Categories):
print(f'{val} = {probability[0][ind]*100}%')
print("The predicted image is : " + Categories[self.model.predict(l)[0]])
print("\n")
else:
print(f"Ignored {image_name}")
def calculate_metrics(self, y_test, y_pred):
"""
@brief Calculates and prints several evaluation metrics.
@param y_test: True target values.
@param y_pred: Predicted target values.
@return A dictionary containing the evaluation metrics.
"""
accuracy = accuracy_score(y_test, y_pred)*100
precision = precision_score(y_test, y_pred, average='weighted')*100
recall = recall_score(y_test, y_pred, average='weighted')*100
f1 = f1_score(y_test, y_pred, average='weighted')*100
auc_roc = roc_auc_score(y_test, y_pred)*100
print(classification_report(y_test,y_pred))
print(f"Accuracy: {accuracy}%\nPrecision: {precision}%\nRecall: {recall}%\nF1 Score: {f1}%\nAUC-ROC: {auc_roc}%")
import numpy as np
from skimage.io import imread
from skimage.transform import resize
import joblib
class Inference:
def __init__(self, model_path, preprocessor):
# Load the trained model from the provided path
self.model = joblib.load(model_path)
self.preprocessor = preprocessor
def preprocess_input(self, input_data):
# Preprocess the input data in the same manner as was done during training.
# converting it to grayscale, etc.
# Here, we assume `input_data` is an image path.
img = imread(input_data)
img_resized = resize(img, (150, 150, 3))
features = self.preprocessor.extract_features(img_resized)
return np.array([features])
def predict(self, input_data):
# Make a prediction using the trained model and the preprocessed input data
preprocessed_data = self.preprocess_input(input_data)
prediction = self.model.predict(preprocessed_data)
return prediction
# Initialize the classes
preprocessor = Preprocessor()
# Load the images and labels
X, y = preprocessor.preprocess('./dataset_18')
Loading... category: covid Loaded category: covid successfully Loading... category: normal Loaded category: normal successfully
# Splitting the data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1,
stratify=y)
x_test, x_val, y_test, y_val = train_test_split(
x_test, y_test, test_size=0.5, random_state=1, stratify=y_test)
clf = Classifier(validation=[(x_train, y_train), (x_val, y_val)])
# Optimize the hyperparameters
clf.optimize(x_train, y_train,"xgb")
| iter | target | colsam... | gamma | learni... | max_depth | n_esti... | subsample | ------------------------------------------------------------------------------------------------- | 1 | 0.696 | 0.2668 | 3.602 | 0.01002 | 4.209 | 232.1 | 0.3369 | | 2 | 0.703 | 0.1745 | 1.728 | 0.08539 | 5.155 | 477.3 | 0.5741 | | 3 | 0.7328 | 0.1818 | 4.391 | 0.0152 | 5.682 | 475.6 | 0.5235 | | 4 | 0.6822 | 0.4849 | 4.457 | 0.179 | 5.296 | 475.7 | 0.5276 | | 5 | 0.7164 | 0.3391 | 1.592 | 0.136 | 4.794 | 255.0 | 0.4212 | =================================================================================================
# Train the classifier with the optimized hyperparameters
history = clf.train(x_train, y_train,"xgb")
clf.save_model("./mlModels/xgbModel.joblib")
[0] validation_0-logloss:0.69093 validation_1-logloss:0.69056 [1] validation_0-logloss:0.68832 validation_1-logloss:0.68665 [2] validation_0-logloss:0.69014 validation_1-logloss:0.68879 [3] validation_0-logloss:0.68813 validation_1-logloss:0.68642 [4] validation_0-logloss:0.68644 validation_1-logloss:0.68442 [5] validation_0-logloss:0.68808 validation_1-logloss:0.68636 [6] validation_0-logloss:0.68949 validation_1-logloss:0.68945 [7] validation_0-logloss:0.69041 validation_1-logloss:0.68930 [8] validation_0-logloss:0.68816 validation_1-logloss:0.68676 [9] validation_0-logloss:0.68972 validation_1-logloss:0.68873 [10] validation_0-logloss:0.68768 validation_1-logloss:0.68641 [11] validation_0-logloss:0.68972 validation_1-logloss:0.68874 [12] validation_0-logloss:0.68771 validation_1-logloss:0.68644 [13] validation_0-logloss:0.68579 validation_1-logloss:0.68424 [14] validation_0-logloss:0.68769 validation_1-logloss:0.68643 [15] validation_0-logloss:0.68590 validation_1-logloss:0.68436 [16] validation_0-logloss:0.68779 validation_1-logloss:0.68654 [17] validation_0-logloss:0.68857 validation_1-logloss:0.68771 [18] validation_0-logloss:0.69058 validation_1-logloss:0.68998 [19] validation_0-logloss:0.69222 validation_1-logloss:0.69264 [20] validation_0-logloss:0.68855 validation_1-logloss:0.68868 [21] validation_0-logloss:0.69075 validation_1-logloss:0.69117 [22] validation_0-logloss:0.68859 validation_1-logloss:0.68873 [23] validation_0-logloss:0.68558 validation_1-logloss:0.68635 [24] validation_0-logloss:0.68364 validation_1-logloss:0.68413 [25] validation_0-logloss:0.68458 validation_1-logloss:0.68472 [26] validation_0-logloss:0.68252 validation_1-logloss:0.68246 [27] validation_0-logloss:0.68079 validation_1-logloss:0.68049 [28] validation_0-logloss:0.68270 validation_1-logloss:0.68266 [29] validation_0-logloss:0.68417 validation_1-logloss:0.68434 [30] validation_0-logloss:0.68604 validation_1-logloss:0.68645 [31] validation_0-logloss:0.68395 validation_1-logloss:0.68409 [32] validation_0-logloss:0.68580 validation_1-logloss:0.68619 [33] validation_0-logloss:0.68137 validation_1-logloss:0.68271 [34] validation_0-logloss:0.68167 validation_1-logloss:0.68387 [35] validation_0-logloss:0.68352 validation_1-logloss:0.68592 [36] validation_0-logloss:0.68554 validation_1-logloss:0.68903 [37] validation_0-logloss:0.68458 validation_1-logloss:0.68772 [38] validation_0-logloss:0.68253 validation_1-logloss:0.68545 [39] validation_0-logloss:0.67991 validation_1-logloss:0.68344 [40] validation_0-logloss:0.68150 validation_1-logloss:0.68552 [41] validation_0-logloss:0.67836 validation_1-logloss:0.68146 [42] validation_0-logloss:0.67787 validation_1-logloss:0.68138 [43] validation_0-logloss:0.67587 validation_1-logloss:0.67922 [44] validation_0-logloss:0.67728 validation_1-logloss:0.68079 [45] validation_0-logloss:0.67368 validation_1-logloss:0.67737 [46] validation_0-logloss:0.67077 validation_1-logloss:0.67417 [47] validation_0-logloss:0.67284 validation_1-logloss:0.67644 [48] validation_0-logloss:0.67023 validation_1-logloss:0.67448 [49] validation_0-logloss:0.66828 validation_1-logloss:0.67249 [50] validation_0-logloss:0.66641 validation_1-logloss:0.67044 [51] validation_0-logloss:0.66793 validation_1-logloss:0.67298 [52] validation_0-logloss:0.66775 validation_1-logloss:0.67397 [53] validation_0-logloss:0.66598 validation_1-logloss:0.67201 [54] validation_0-logloss:0.66407 validation_1-logloss:0.66991 [55] validation_0-logloss:0.66244 validation_1-logloss:0.66808 [56] validation_0-logloss:0.66382 validation_1-logloss:0.67024 [57] validation_0-logloss:0.66543 validation_1-logloss:0.67281 [58] validation_0-logloss:0.66368 validation_1-logloss:0.67088 [59] validation_0-logloss:0.66549 validation_1-logloss:0.67288 [60] validation_0-logloss:0.66737 validation_1-logloss:0.67494 [61] validation_0-logloss:0.66538 validation_1-logloss:0.67293 [62] validation_0-logloss:0.66388 validation_1-logloss:0.67128 [63] validation_0-logloss:0.66517 validation_1-logloss:0.67265 [64] validation_0-logloss:0.66663 validation_1-logloss:0.67460 [65] validation_0-logloss:0.66555 validation_1-logloss:0.67342 [66] validation_0-logloss:0.66719 validation_1-logloss:0.67522 [67] validation_0-logloss:0.66862 validation_1-logloss:0.67680 [68] validation_0-logloss:0.66661 validation_1-logloss:0.67442 [69] validation_0-logloss:0.66412 validation_1-logloss:0.67298 [70] validation_0-logloss:0.66582 validation_1-logloss:0.67485 [71] validation_0-logloss:0.66390 validation_1-logloss:0.67389 [72] validation_0-logloss:0.66210 validation_1-logloss:0.67192 [73] validation_0-logloss:0.66021 validation_1-logloss:0.66985 [74] validation_0-logloss:0.65865 validation_1-logloss:0.66813 [75] validation_0-logloss:0.65994 validation_1-logloss:0.67038 [76] validation_0-logloss:0.66155 validation_1-logloss:0.67215 [77] validation_0-logloss:0.65998 validation_1-logloss:0.67042 [78] validation_0-logloss:0.66169 validation_1-logloss:0.67230 [79] validation_0-logloss:0.65988 validation_1-logloss:0.67096 [80] validation_0-logloss:0.66150 validation_1-logloss:0.67273 [81] validation_0-logloss:0.66326 validation_1-logloss:0.67464 [82] validation_0-logloss:0.66183 validation_1-logloss:0.67309 [83] validation_0-logloss:0.66017 validation_1-logloss:0.67127 [84] validation_0-logloss:0.66041 validation_1-logloss:0.67009 [85] validation_0-logloss:0.65884 validation_1-logloss:0.66837 [86] validation_0-logloss:0.65707 validation_1-logloss:0.66652 [87] validation_0-logloss:0.65873 validation_1-logloss:0.66834 [88] validation_0-logloss:0.65738 validation_1-logloss:0.66686 [89] validation_0-logloss:0.65917 validation_1-logloss:0.66902 [90] validation_0-logloss:0.65724 validation_1-logloss:0.66783 [91] validation_0-logloss:0.65872 validation_1-logloss:0.67033 [92] validation_0-logloss:0.65730 validation_1-logloss:0.66877 [93] validation_0-logloss:0.65594 validation_1-logloss:0.66728 [94] validation_0-logloss:0.65720 validation_1-logloss:0.66867 [95] validation_0-logloss:0.65578 validation_1-logloss:0.66711 [96] validation_0-logloss:0.65476 validation_1-logloss:0.66597 [97] validation_0-logloss:0.65284 validation_1-logloss:0.66358 [98] validation_0-logloss:0.65426 validation_1-logloss:0.66515 [99] validation_0-logloss:0.65497 validation_1-logloss:0.66610 [100] validation_0-logloss:0.65248 validation_1-logloss:0.66292 [101] validation_0-logloss:0.65373 validation_1-logloss:0.66429 [102] validation_0-logloss:0.65532 validation_1-logloss:0.66604 [103] validation_0-logloss:0.65385 validation_1-logloss:0.66458 [104] validation_0-logloss:0.65496 validation_1-logloss:0.66580 [105] validation_0-logloss:0.65337 validation_1-logloss:0.66434 [106] validation_0-logloss:0.65220 validation_1-logloss:0.66305 [107] validation_0-logloss:0.65088 validation_1-logloss:0.66163 [108] validation_0-logloss:0.65202 validation_1-logloss:0.66290 [109] validation_0-logloss:0.65012 validation_1-logloss:0.66141 [110] validation_0-logloss:0.64801 validation_1-logloss:0.65898 [111] validation_0-logloss:0.64684 validation_1-logloss:0.65767 [112] validation_0-logloss:0.64582 validation_1-logloss:0.65654 [113] validation_0-logloss:0.64702 validation_1-logloss:0.65788 [114] validation_0-logloss:0.64790 validation_1-logloss:0.65893 [115] validation_0-logloss:0.64662 validation_1-logloss:0.65676 [116] validation_0-logloss:0.64537 validation_1-logloss:0.65537 [117] validation_0-logloss:0.64636 validation_1-logloss:0.65647 [118] validation_0-logloss:0.64494 validation_1-logloss:0.65490 [119] validation_0-logloss:0.64392 validation_1-logloss:0.65377 [120] validation_0-logloss:0.64261 validation_1-logloss:0.65284 [121] validation_0-logloss:0.64196 validation_1-logloss:0.65211 [122] validation_0-logloss:0.64108 validation_1-logloss:0.65112 [123] validation_0-logloss:0.63999 validation_1-logloss:0.64988 [124] validation_0-logloss:0.64098 validation_1-logloss:0.65101 [125] validation_0-logloss:0.63981 validation_1-logloss:0.64952 [126] validation_0-logloss:0.64092 validation_1-logloss:0.65078 [127] validation_0-logloss:0.64007 validation_1-logloss:0.64877 [128] validation_0-logloss:0.63852 validation_1-logloss:0.64743 [129] validation_0-logloss:0.63935 validation_1-logloss:0.64835 [130] validation_0-logloss:0.63837 validation_1-logloss:0.64726 [131] validation_0-logloss:0.63889 validation_1-logloss:0.64783 [132] validation_0-logloss:0.63998 validation_1-logloss:0.64904 [133] validation_0-logloss:0.64116 validation_1-logloss:0.65035 [134] validation_0-logloss:0.64212 validation_1-logloss:0.65190 [135] validation_0-logloss:0.64313 validation_1-logloss:0.65349 [136] validation_0-logloss:0.64399 validation_1-logloss:0.65443 [137] validation_0-logloss:0.64360 validation_1-logloss:0.65534 [138] validation_0-logloss:0.64486 validation_1-logloss:0.65672 [139] validation_0-logloss:0.64409 validation_1-logloss:0.65502 [140] validation_0-logloss:0.64534 validation_1-logloss:0.65637 [141] validation_0-logloss:0.64669 validation_1-logloss:0.65777 [142] validation_0-logloss:0.64561 validation_1-logloss:0.65661 [143] validation_0-logloss:0.64405 validation_1-logloss:0.65493 [144] validation_0-logloss:0.64458 validation_1-logloss:0.65548 [145] validation_0-logloss:0.64604 validation_1-logloss:0.65704 [146] validation_0-logloss:0.64417 validation_1-logloss:0.65483 [147] validation_0-logloss:0.64510 validation_1-logloss:0.65672 [148] validation_0-logloss:0.64657 validation_1-logloss:0.65830 [149] validation_0-logloss:0.64796 validation_1-logloss:0.65980 [150] validation_0-logloss:0.64656 validation_1-logloss:0.65829 [151] validation_0-logloss:0.64515 validation_1-logloss:0.65678 [152] validation_0-logloss:0.64423 validation_1-logloss:0.65601 [153] validation_0-logloss:0.64290 validation_1-logloss:0.65536 [154] validation_0-logloss:0.64134 validation_1-logloss:0.65368 [155] validation_0-logloss:0.64215 validation_1-logloss:0.65455 [156] validation_0-logloss:0.64143 validation_1-logloss:0.65282 [157] validation_0-logloss:0.63949 validation_1-logloss:0.65042 [158] validation_0-logloss:0.64008 validation_1-logloss:0.65138 [159] validation_0-logloss:0.64147 validation_1-logloss:0.65288 [160] validation_0-logloss:0.64215 validation_1-logloss:0.65360 [161] validation_0-logloss:0.64319 validation_1-logloss:0.65498 [162] validation_0-logloss:0.64428 validation_1-logloss:0.65615 [163] validation_0-logloss:0.64234 validation_1-logloss:0.65593 [164] validation_0-logloss:0.64069 validation_1-logloss:0.65417 [165] validation_0-logloss:0.63952 validation_1-logloss:0.65291 [166] validation_0-logloss:0.64070 validation_1-logloss:0.65418 [167] validation_0-logloss:0.64112 validation_1-logloss:0.65294 [168] validation_0-logloss:0.64026 validation_1-logloss:0.65210 [169] validation_0-logloss:0.63909 validation_1-logloss:0.65086 [170] validation_0-logloss:0.64012 validation_1-logloss:0.65195 [171] validation_0-logloss:0.64066 validation_1-logloss:0.65252 [172] validation_0-logloss:0.63975 validation_1-logloss:0.65156 [173] validation_0-logloss:0.63820 validation_1-logloss:0.65067 [174] validation_0-logloss:0.63621 validation_1-logloss:0.64900 [175] validation_0-logloss:0.63741 validation_1-logloss:0.65034 [176] validation_0-logloss:0.63686 validation_1-logloss:0.65057 [177] validation_0-logloss:0.63751 validation_1-logloss:0.65126 [178] validation_0-logloss:0.63653 validation_1-logloss:0.65022 [179] validation_0-logloss:0.63764 validation_1-logloss:0.65139 [180] validation_0-logloss:0.63667 validation_1-logloss:0.65020 [181] validation_0-logloss:0.63785 validation_1-logloss:0.65145 [182] validation_0-logloss:0.63695 validation_1-logloss:0.65050 [183] validation_0-logloss:0.63836 validation_1-logloss:0.65199 [184] validation_0-logloss:0.63704 validation_1-logloss:0.65001 [185] validation_0-logloss:0.63581 validation_1-logloss:0.64871 [186] validation_0-logloss:0.63492 validation_1-logloss:0.64819 [187] validation_0-logloss:0.63583 validation_1-logloss:0.64916 [188] validation_0-logloss:0.63649 validation_1-logloss:0.64985 [189] validation_0-logloss:0.63543 validation_1-logloss:0.64873 [190] validation_0-logloss:0.63637 validation_1-logloss:0.64972 [191] validation_0-logloss:0.63687 validation_1-logloss:0.65025 [192] validation_0-logloss:0.63757 validation_1-logloss:0.65100 [193] validation_0-logloss:0.63677 validation_1-logloss:0.64973 [194] validation_0-logloss:0.63782 validation_1-logloss:0.65084 [195] validation_0-logloss:0.63671 validation_1-logloss:0.64967 [196] validation_0-logloss:0.63548 validation_1-logloss:0.64837 [197] validation_0-logloss:0.63599 validation_1-logloss:0.64891 [198] validation_0-logloss:0.63540 validation_1-logloss:0.64829 [199] validation_0-logloss:0.63599 validation_1-logloss:0.64891 [200] validation_0-logloss:0.63547 validation_1-logloss:0.64836 [201] validation_0-logloss:0.63596 validation_1-logloss:0.64888 [202] validation_0-logloss:0.63523 validation_1-logloss:0.64811 [203] validation_0-logloss:0.63408 validation_1-logloss:0.64688 [204] validation_0-logloss:0.63525 validation_1-logloss:0.64812 [205] validation_0-logloss:0.63618 validation_1-logloss:0.64910 [206] validation_0-logloss:0.63466 validation_1-logloss:0.64750 [207] validation_0-logloss:0.63367 validation_1-logloss:0.64645 [208] validation_0-logloss:0.63380 validation_1-logloss:0.64528 [209] validation_0-logloss:0.63451 validation_1-logloss:0.64601 [210] validation_0-logloss:0.63371 validation_1-logloss:0.64519 [211] validation_0-logloss:0.63309 validation_1-logloss:0.64454 [212] validation_0-logloss:0.63394 validation_1-logloss:0.64543 [213] validation_0-logloss:0.63289 validation_1-logloss:0.64433 [214] validation_0-logloss:0.63157 validation_1-logloss:0.64308 [215] validation_0-logloss:0.63261 validation_1-logloss:0.64416 [216] validation_0-logloss:0.63131 validation_1-logloss:0.64280 [217] validation_0-logloss:0.63048 validation_1-logloss:0.64193 [218] validation_0-logloss:0.62998 validation_1-logloss:0.64111 [219] validation_0-logloss:0.63071 validation_1-logloss:0.64186 [220] validation_0-logloss:0.63172 validation_1-logloss:0.64335 [221] validation_0-logloss:0.63235 validation_1-logloss:0.64401 [222] validation_0-logloss:0.63287 validation_1-logloss:0.64454 [223] validation_0-logloss:0.63355 validation_1-logloss:0.64524 [224] validation_0-logloss:0.63263 validation_1-logloss:0.64432 [225] validation_0-logloss:0.63424 validation_1-logloss:0.64600 [226] validation_0-logloss:0.63547 validation_1-logloss:0.64782 [227] validation_0-logloss:0.63439 validation_1-logloss:0.64669 [228] validation_0-logloss:0.63482 validation_1-logloss:0.64714 [229] validation_0-logloss:0.63576 validation_1-logloss:0.64812 [230] validation_0-logloss:0.63669 validation_1-logloss:0.64908 [231] validation_0-logloss:0.63690 validation_1-logloss:0.64930 [232] validation_0-logloss:0.63592 validation_1-logloss:0.64829 [233] validation_0-logloss:0.63417 validation_1-logloss:0.64609 [234] validation_0-logloss:0.63470 validation_1-logloss:0.64664 [235] validation_0-logloss:0.63535 validation_1-logloss:0.64731 [236] validation_0-logloss:0.63626 validation_1-logloss:0.64825 [237] validation_0-logloss:0.63623 validation_1-logloss:0.64822 [238] validation_0-logloss:0.63563 validation_1-logloss:0.64760 [239] validation_0-logloss:0.63566 validation_1-logloss:0.64763 [240] validation_0-logloss:0.63563 validation_1-logloss:0.64760 [241] validation_0-logloss:0.63443 validation_1-logloss:0.64636 [242] validation_0-logloss:0.63497 validation_1-logloss:0.64697 [243] validation_0-logloss:0.63598 validation_1-logloss:0.64801 [244] validation_0-logloss:0.63472 validation_1-logloss:0.64672 [245] validation_0-logloss:0.63372 validation_1-logloss:0.64588 [246] validation_0-logloss:0.63390 validation_1-logloss:0.64607 [247] validation_0-logloss:0.63456 validation_1-logloss:0.64675 [248] validation_0-logloss:0.63458 validation_1-logloss:0.64651 [249] validation_0-logloss:0.63451 validation_1-logloss:0.64644 [250] validation_0-logloss:0.63382 validation_1-logloss:0.64573 [251] validation_0-logloss:0.63314 validation_1-logloss:0.64458 [252] validation_0-logloss:0.63361 validation_1-logloss:0.64506 [253] validation_0-logloss:0.63343 validation_1-logloss:0.64488 [254] validation_0-logloss:0.63263 validation_1-logloss:0.64406 [255] validation_0-logloss:0.63215 validation_1-logloss:0.64423 [256] validation_0-logloss:0.63100 validation_1-logloss:0.64305 [257] validation_0-logloss:0.63097 validation_1-logloss:0.64302 [258] validation_0-logloss:0.63237 validation_1-logloss:0.64446 [259] validation_0-logloss:0.63231 validation_1-logloss:0.64440 [260] validation_0-logloss:0.63309 validation_1-logloss:0.64520 [261] validation_0-logloss:0.63391 validation_1-logloss:0.64604 [262] validation_0-logloss:0.63349 validation_1-logloss:0.64561 [263] validation_0-logloss:0.63454 validation_1-logloss:0.64669 [264] validation_0-logloss:0.63446 validation_1-logloss:0.64661 [265] validation_0-logloss:0.63436 validation_1-logloss:0.64650 [266] validation_0-logloss:0.63476 validation_1-logloss:0.64729 [267] validation_0-logloss:0.63343 validation_1-logloss:0.64593 [268] validation_0-logloss:0.63258 validation_1-logloss:0.64505 [269] validation_0-logloss:0.63340 validation_1-logloss:0.64590 [270] validation_0-logloss:0.63427 validation_1-logloss:0.64679 [271] validation_0-logloss:0.63523 validation_1-logloss:0.64777 [272] validation_0-logloss:0.63554 validation_1-logloss:0.64771 [273] validation_0-logloss:0.63558 validation_1-logloss:0.64776 [274] validation_0-logloss:0.63470 validation_1-logloss:0.64676 [275] validation_0-logloss:0.63474 validation_1-logloss:0.64680 [276] validation_0-logloss:0.63463 validation_1-logloss:0.64668 [277] validation_0-logloss:0.63557 validation_1-logloss:0.64765 [278] validation_0-logloss:0.63658 validation_1-logloss:0.64869 [279] validation_0-logloss:0.63747 validation_1-logloss:0.64960 [280] validation_0-logloss:0.63835 validation_1-logloss:0.65050 [281] validation_0-logloss:0.63907 validation_1-logloss:0.65138 [282] validation_0-logloss:0.63830 validation_1-logloss:0.65059 [283] validation_0-logloss:0.63838 validation_1-logloss:0.65067 [284] validation_0-logloss:0.63840 validation_1-logloss:0.65069 [285] validation_0-logloss:0.63936 validation_1-logloss:0.65168 [286] validation_0-logloss:0.63883 validation_1-logloss:0.65113 [287] validation_0-logloss:0.63973 validation_1-logloss:0.65205 [288] validation_0-logloss:0.64116 validation_1-logloss:0.65352 [289] validation_0-logloss:0.64274 validation_1-logloss:0.65515 [290] validation_0-logloss:0.64274 validation_1-logloss:0.65514 [291] validation_0-logloss:0.64171 validation_1-logloss:0.65408 [292] validation_0-logloss:0.64155 validation_1-logloss:0.65392 [293] validation_0-logloss:0.64160 validation_1-logloss:0.65398 [294] validation_0-logloss:0.64052 validation_1-logloss:0.65366 [295] validation_0-logloss:0.64172 validation_1-logloss:0.65489 [296] validation_0-logloss:0.64155 validation_1-logloss:0.65471 [297] validation_0-logloss:0.64262 validation_1-logloss:0.65582 [298] validation_0-logloss:0.64183 validation_1-logloss:0.65495 [299] validation_0-logloss:0.64171 validation_1-logloss:0.65483 [300] validation_0-logloss:0.64073 validation_1-logloss:0.65317 [301] validation_0-logloss:0.64145 validation_1-logloss:0.65391 [302] validation_0-logloss:0.64157 validation_1-logloss:0.65403 [303] validation_0-logloss:0.64216 validation_1-logloss:0.65464 [304] validation_0-logloss:0.64204 validation_1-logloss:0.65451 [305] validation_0-logloss:0.64372 validation_1-logloss:0.65622 [306] validation_0-logloss:0.64351 validation_1-logloss:0.65601 [307] validation_0-logloss:0.64262 validation_1-logloss:0.65427 [308] validation_0-logloss:0.64278 validation_1-logloss:0.65444 [309] validation_0-logloss:0.64290 validation_1-logloss:0.65456 [310] validation_0-logloss:0.64395 validation_1-logloss:0.65563 [311] validation_0-logloss:0.64371 validation_1-logloss:0.65538 [312] validation_0-logloss:0.64332 validation_1-logloss:0.65498 [313] validation_0-logloss:0.64344 validation_1-logloss:0.65384 [314] validation_0-logloss:0.64495 validation_1-logloss:0.65537 [315] validation_0-logloss:0.64645 validation_1-logloss:0.65689 [316] validation_0-logloss:0.64646 validation_1-logloss:0.65691 [317] validation_0-logloss:0.64635 validation_1-logloss:0.65679 [318] validation_0-logloss:0.64651 validation_1-logloss:0.65696 [319] validation_0-logloss:0.64582 validation_1-logloss:0.65625 [320] validation_0-logloss:0.64802 validation_1-logloss:0.65848 [321] validation_0-logloss:0.64893 validation_1-logloss:0.65940 [322] validation_0-logloss:0.64900 validation_1-logloss:0.65948 [323] validation_0-logloss:0.65005 validation_1-logloss:0.66054 [324] validation_0-logloss:0.65001 validation_1-logloss:0.66051 [325] validation_0-logloss:0.64974 validation_1-logloss:0.66040 [326] validation_0-logloss:0.64963 validation_1-logloss:0.66030 [327] validation_0-logloss:0.64969 validation_1-logloss:0.66035 [328] validation_0-logloss:0.64984 validation_1-logloss:0.66050 [329] validation_0-logloss:0.64890 validation_1-logloss:0.65956 [330] validation_0-logloss:0.64730 validation_1-logloss:0.65793 [331] validation_0-logloss:0.64854 validation_1-logloss:0.65919 [332] validation_0-logloss:0.64948 validation_1-logloss:0.66014 [333] validation_0-logloss:0.64862 validation_1-logloss:0.65927 [334] validation_0-logloss:0.64879 validation_1-logloss:0.65944 [335] validation_0-logloss:0.64807 validation_1-logloss:0.65872 [336] validation_0-logloss:0.64668 validation_1-logloss:0.65730 [337] validation_0-logloss:0.64621 validation_1-logloss:0.65682 [338] validation_0-logloss:0.64615 validation_1-logloss:0.65676 [339] validation_0-logloss:0.64613 validation_1-logloss:0.65674 [340] validation_0-logloss:0.64592 validation_1-logloss:0.65653 [341] validation_0-logloss:0.64485 validation_1-logloss:0.65544 [342] validation_0-logloss:0.64398 validation_1-logloss:0.65456 [343] validation_0-logloss:0.64401 validation_1-logloss:0.65459 [344] validation_0-logloss:0.64374 validation_1-logloss:0.65432 [345] validation_0-logloss:0.64482 validation_1-logloss:0.65542 [346] validation_0-logloss:0.64491 validation_1-logloss:0.65551 [347] validation_0-logloss:0.64494 validation_1-logloss:0.65554 [348] validation_0-logloss:0.64499 validation_1-logloss:0.65559 [349] validation_0-logloss:0.64473 validation_1-logloss:0.65533 [350] validation_0-logloss:0.64462 validation_1-logloss:0.65521 [351] validation_0-logloss:0.64464 validation_1-logloss:0.65524 [352] validation_0-logloss:0.64461 validation_1-logloss:0.65520 [353] validation_0-logloss:0.64344 validation_1-logloss:0.65401 [354] validation_0-logloss:0.64255 validation_1-logloss:0.65311 [355] validation_0-logloss:0.64239 validation_1-logloss:0.65295 [356] validation_0-logloss:0.64212 validation_1-logloss:0.65268 [357] validation_0-logloss:0.64237 validation_1-logloss:0.65293 [358] validation_0-logloss:0.64314 validation_1-logloss:0.65371 [359] validation_0-logloss:0.64284 validation_1-logloss:0.65341 [360] validation_0-logloss:0.64394 validation_1-logloss:0.65452 [361] validation_0-logloss:0.64374 validation_1-logloss:0.65432 [362] validation_0-logloss:0.64377 validation_1-logloss:0.65435 [363] validation_0-logloss:0.64307 validation_1-logloss:0.65364 [364] validation_0-logloss:0.64245 validation_1-logloss:0.65325 [365] validation_0-logloss:0.64244 validation_1-logloss:0.65324 [366] validation_0-logloss:0.64368 validation_1-logloss:0.65476 [367] validation_0-logloss:0.64353 validation_1-logloss:0.65461 [368] validation_0-logloss:0.64247 validation_1-logloss:0.65353 [369] validation_0-logloss:0.64273 validation_1-logloss:0.65380 [370] validation_0-logloss:0.64363 validation_1-logloss:0.65471 [371] validation_0-logloss:0.64362 validation_1-logloss:0.65470 [372] validation_0-logloss:0.64362 validation_1-logloss:0.65470 [373] validation_0-logloss:0.64296 validation_1-logloss:0.65404 [374] validation_0-logloss:0.64366 validation_1-logloss:0.65474 [375] validation_0-logloss:0.64472 validation_1-logloss:0.65581 [376] validation_0-logloss:0.64488 validation_1-logloss:0.65646 [377] validation_0-logloss:0.64495 validation_1-logloss:0.65652 [378] validation_0-logloss:0.64494 validation_1-logloss:0.65652 [379] validation_0-logloss:0.64360 validation_1-logloss:0.65473 [380] validation_0-logloss:0.64342 validation_1-logloss:0.65455 [381] validation_0-logloss:0.64402 validation_1-logloss:0.65515 [382] validation_0-logloss:0.64554 validation_1-logloss:0.65669 [383] validation_0-logloss:0.64527 validation_1-logloss:0.65642 [384] validation_0-logloss:0.64402 validation_1-logloss:0.65515 [385] validation_0-logloss:0.64405 validation_1-logloss:0.65518 [386] validation_0-logloss:0.64272 validation_1-logloss:0.65384 [387] validation_0-logloss:0.64247 validation_1-logloss:0.65359 [388] validation_0-logloss:0.64353 validation_1-logloss:0.65465 [389] validation_0-logloss:0.64377 validation_1-logloss:0.65490 [390] validation_0-logloss:0.64470 validation_1-logloss:0.65584 [391] validation_0-logloss:0.64445 validation_1-logloss:0.65559 [392] validation_0-logloss:0.64459 validation_1-logloss:0.65573 [393] validation_0-logloss:0.64435 validation_1-logloss:0.65549 [394] validation_0-logloss:0.64428 validation_1-logloss:0.65542 [395] validation_0-logloss:0.64446 validation_1-logloss:0.65560 [396] validation_0-logloss:0.64426 validation_1-logloss:0.65539 [397] validation_0-logloss:0.64302 validation_1-logloss:0.65443 [398] validation_0-logloss:0.64282 validation_1-logloss:0.65423 [399] validation_0-logloss:0.64147 validation_1-logloss:0.65286 [400] validation_0-logloss:0.64130 validation_1-logloss:0.65269 [401] validation_0-logloss:0.64123 validation_1-logloss:0.65262 [402] validation_0-logloss:0.64113 validation_1-logloss:0.65253 [403] validation_0-logloss:0.64134 validation_1-logloss:0.65274 [404] validation_0-logloss:0.64130 validation_1-logloss:0.65269 [405] validation_0-logloss:0.63991 validation_1-logloss:0.65129 [406] validation_0-logloss:0.63984 validation_1-logloss:0.65122 [407] validation_0-logloss:0.64062 validation_1-logloss:0.65201 [408] validation_0-logloss:0.64083 validation_1-logloss:0.65222 [409] validation_0-logloss:0.64217 validation_1-logloss:0.65358 [410] validation_0-logloss:0.64331 validation_1-logloss:0.65473 [411] validation_0-logloss:0.64346 validation_1-logloss:0.65488 [412] validation_0-logloss:0.64333 validation_1-logloss:0.65474 [413] validation_0-logloss:0.64348 validation_1-logloss:0.65490 [414] validation_0-logloss:0.64215 validation_1-logloss:0.65356 [415] validation_0-logloss:0.64205 validation_1-logloss:0.65345 [416] validation_0-logloss:0.64236 validation_1-logloss:0.65376 [417] validation_0-logloss:0.64398 validation_1-logloss:0.65540 [418] validation_0-logloss:0.64371 validation_1-logloss:0.65536 [419] validation_0-logloss:0.64372 validation_1-logloss:0.65537 [420] validation_0-logloss:0.64433 validation_1-logloss:0.65607 [421] validation_0-logloss:0.64407 validation_1-logloss:0.65580 [422] validation_0-logloss:0.64397 validation_1-logloss:0.65570 [423] validation_0-logloss:0.64439 validation_1-logloss:0.65613 [424] validation_0-logloss:0.64374 validation_1-logloss:0.65547 [425] validation_0-logloss:0.64389 validation_1-logloss:0.65562 [426] validation_0-logloss:0.64358 validation_1-logloss:0.65531 [427] validation_0-logloss:0.64356 validation_1-logloss:0.65529 [428] validation_0-logloss:0.64326 validation_1-logloss:0.65499 [429] validation_0-logloss:0.64180 validation_1-logloss:0.65350 [430] validation_0-logloss:0.64188 validation_1-logloss:0.65359 [431] validation_0-logloss:0.64209 validation_1-logloss:0.65380 [432] validation_0-logloss:0.64268 validation_1-logloss:0.65439 [433] validation_0-logloss:0.64259 validation_1-logloss:0.65430 [434] validation_0-logloss:0.64394 validation_1-logloss:0.65568 [435] validation_0-logloss:0.64501 validation_1-logloss:0.65676 [436] validation_0-logloss:0.64506 validation_1-logloss:0.65680 [437] validation_0-logloss:0.64494 validation_1-logloss:0.65669 [438] validation_0-logloss:0.64500 validation_1-logloss:0.65674 [439] validation_0-logloss:0.64637 validation_1-logloss:0.65813 [440] validation_0-logloss:0.64627 validation_1-logloss:0.65803 [441] validation_0-logloss:0.64626 validation_1-logloss:0.65803 [442] validation_0-logloss:0.64742 validation_1-logloss:0.65920 [443] validation_0-logloss:0.64731 validation_1-logloss:0.65909 [444] validation_0-logloss:0.64734 validation_1-logloss:0.65912 [445] validation_0-logloss:0.64775 validation_1-logloss:0.65953 [446] validation_0-logloss:0.64796 validation_1-logloss:0.65975 [447] validation_0-logloss:0.64807 validation_1-logloss:0.65986 [448] validation_0-logloss:0.64805 validation_1-logloss:0.65984 [449] validation_0-logloss:0.64890 validation_1-logloss:0.66070 [450] validation_0-logloss:0.64894 validation_1-logloss:0.66074 [451] validation_0-logloss:0.64855 validation_1-logloss:0.66034 [452] validation_0-logloss:0.64978 validation_1-logloss:0.66159 [453] validation_0-logloss:0.64996 validation_1-logloss:0.66177 [454] validation_0-logloss:0.65091 validation_1-logloss:0.66273 [455] validation_0-logloss:0.64969 validation_1-logloss:0.66150 [456] validation_0-logloss:0.65054 validation_1-logloss:0.66236 [457] validation_0-logloss:0.65207 validation_1-logloss:0.66390 [458] validation_0-logloss:0.65172 validation_1-logloss:0.66355 [459] validation_0-logloss:0.65203 validation_1-logloss:0.66386 [460] validation_0-logloss:0.65198 validation_1-logloss:0.66382 [461] validation_0-logloss:0.65288 validation_1-logloss:0.66472 [462] validation_0-logloss:0.65272 validation_1-logloss:0.66456 [463] validation_0-logloss:0.65231 validation_1-logloss:0.66415 [464] validation_0-logloss:0.65238 validation_1-logloss:0.66422 [465] validation_0-logloss:0.65264 validation_1-logloss:0.66448 [466] validation_0-logloss:0.65296 validation_1-logloss:0.66481 [467] validation_0-logloss:0.65186 validation_1-logloss:0.66370 [468] validation_0-logloss:0.65188 validation_1-logloss:0.66372 [469] validation_0-logloss:0.65205 validation_1-logloss:0.66388 [470] validation_0-logloss:0.65197 validation_1-logloss:0.66380 [471] validation_0-logloss:0.65215 validation_1-logloss:0.66399 [472] validation_0-logloss:0.65185 validation_1-logloss:0.66368 [473] validation_0-logloss:0.65179 validation_1-logloss:0.66362 [474] validation_0-logloss:0.65308 validation_1-logloss:0.66493
evaluator = Evaluator(model=clf.pipeline, preprocessor=preprocessor)
# Evaluate the model
evaluator.evaluate(x_test, y_test)
evaluator.plot_log_loss(history)
# Predict the images in a directory
evaluator.predict_images_in_dir("dataset_18/testing", ['covid', 'normal'])
precision recall f1-score support
0 0.86 0.70 0.78 27
1 0.81 0.92 0.86 37
accuracy 0.83 64
macro avg 0.84 0.81 0.82 64
weighted avg 0.83 0.83 0.82 64
Accuracy: 82.8125%
Precision: 83.23525432900433%
Recall: 82.8125%
F1 Score: 82.47949496254196%
AUC-ROC: 81.13113113113114%
Image Name: Normal-375.png
covid = 8.557015657424927% normal = 91.44298434257507% The predicted image is : normal Image Name: COVID-443.png
covid = 44.83419060707092% normal = 55.16580939292908% The predicted image is : normal Image Name: Normal-354.png
covid = 3.8111984729766846% normal = 96.18880152702332% The predicted image is : normal Image Name: COVID-484.png
covid = 89.93508815765381% normal = 10.064911842346191% The predicted image is : covid Ignored .DS_Store Image Name: COVID-415.png
covid = 64.64850902557373% normal = 35.35149395465851% The predicted image is : covid Image Name: Normal-332.png
covid = 6.206619739532471% normal = 93.79338026046753% The predicted image is : normal
clf = Classifier(validation=[(x_train, y_train), (x_val, y_val)])
clf.optimize(x_train, y_train,"svm")
history = clf.train(x_train, y_train,"svm")
clf.save_model("./mlModels/svmModel.joblib")
evaluator = Evaluator(model=clf.pipeline, preprocessor=preprocessor)
# Evaluate the model
evaluator.evaluate(x_test, y_test)
# Predict the images in a directory
evaluator.predict_images_in_dir("dataset_18/testing", ['covid', 'normal'])
| iter | target | C | gamma |
-------------------------------------------------
| 1 | 0.581 | 41.76 | 0.7204 |
| 2 | 0.581 | 0.1114 | 0.3024 |
| 3 | 0.581 | 14.76 | 0.09243 |
| 4 | 0.581 | 99.99 | 0.1198 |
| 5 | 0.581 | 99.98 | 0.7918 |
=================================================
precision recall f1-score support
0 0.00 0.00 0.00 27
1 0.58 1.00 0.73 37
accuracy 0.58 64
macro avg 0.29 0.50 0.37 64
weighted avg 0.33 0.58 0.42 64
Accuracy: 57.8125%
Precision: 33.4228515625%
Recall: 57.8125%
F1 Score: 42.35767326732673%
AUC-ROC: 50.0%
Image Name: Normal-375.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal Image Name: COVID-443.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal Image Name: Normal-354.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal Image Name: COVID-484.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal Ignored .DS_Store Image Name: COVID-415.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal Image Name: Normal-332.png
covid = 41.58763959125898% normal = 58.41236040874104% The predicted image is : normal
clf = Classifier(validation=[(x_train, y_train), (x_val, y_val)])
clf.optimize(x_train, y_train,"rf")
history = clf.train(x_train, y_train,"rf")
clf.save_model("./mlModels/rfModel.joblib")
evaluator = Evaluator(model=clf.pipeline, preprocessor=preprocessor)
# Evaluate the model
evaluator.evaluate(x_test, y_test)
# Predict the images in a directory
evaluator.predict_images_in_dir("dataset_18/testing", ['covid', 'normal'])
| iter | target | max_depth | max_fe... | n_esti... | ------------------------------------------------------------- | 1 | 0.7769 | 4.668 | 0.3881 | 100.1 | | 2 | 0.804 | 4.209 | 0.1587 | 183.1 | | 3 | 0.7702 | 3.745 | 0.2382 | 457.1 | | 4 | 0.7804 | 3.94 | 0.4292 | 182.1 | | 5 | 0.7974 | 5.129 | 0.415 | 257.4 | ============================================================= precision recall f1-score support 0 0.81 0.63 0.71 27 1 0.77 0.89 0.82 37 accuracy 0.78 64 macro avg 0.79 0.76 0.77 64 weighted avg 0.79 0.78 0.78 64 Accuracy: 78.125% Precision: 78.51951827242524% Recall: 78.125% F1 Score: 77.578125% AUC-ROC: 76.07607607607608% Image Name: Normal-375.png
covid = 12.763513878436731% normal = 87.23648612156327% The predicted image is : normal Image Name: COVID-443.png
covid = 45.04922333830868% normal = 54.95077666169132% The predicted image is : normal Image Name: Normal-354.png
covid = 8.450049112688546% normal = 91.54995088731145% The predicted image is : normal Image Name: COVID-484.png
covid = 76.38132796072158% normal = 23.61867203927844% The predicted image is : covid Ignored .DS_Store Image Name: COVID-415.png
covid = 58.35373880507504% normal = 41.646261194924975% The predicted image is : covid Image Name: Normal-332.png
covid = 12.072219952244348% normal = 87.92778004775568% The predicted image is : normal